数据科学中的统计方法 第六弹 实践 6.4

您所在的位置:网站首页 stepwise selection method 数据科学中的统计方法 第六弹 实践 6.4

数据科学中的统计方法 第六弹 实践 6.4

#数据科学中的统计方法 第六弹 实践 6.4| 来源: 网络整理| 查看: 265

Yo,同学,这期罗老师已经没有精力说骚话了,这次除了作业,实践也挺难的,因为书里的代码居然也要debug。

来张春天的图提升一下信仰值吧。

以下是本次实践的rmd源码:

--- title: "实践 6.4-6.7" author: - 罗力铭 documentclass: ctexart keywords: - 实践 6.4-6.7 output: rticles::ctex: fig_caption: yes number_sections: yes toc: yes --- # 6.5 Lab 1: Subset Selection Methods ## 6.5.1 Best Subset Selection ```{r} library (ISLR) fix(Hitters) names(Hitters) dim(Hitters) sum(is.na(Hitters$Salary)) Hitters=na.omit(Hitters) sum(is.na(Hitters)) library (leaps) regfit.full=regsubsets(Salary~.,Hitters) summary(regfit.full) regfit.full=regsubsets(Salary~.,data=Hitters,nvmax=19) reg.summary=summary(regfit.full) names(reg.summary) reg.summary$rsq par(mfrow=c(2,2)) plot(reg.summary$rss,xlab=" Number of Variables ",ylab="RSS", type="l") plot(reg.summary$adjr2,xlab=" Number of Variables ", ylab=" Adjusted RSq",type="l") which.max(reg.summary$adjr2) points (11,reg.summary$adjr2[11],col="red",cex=2,pch=20) plot(reg.summary$cp,xlab =" Number of Variables ",ylab="Cp", type="l") which.min(reg.summary$cp) points(10,reg.summary$cp[10], col="red",cex=2, pch=20) which.min(reg.summary$bic) plot(reg.summary$bic,xlab=" Number of Variables ",ylab=" BIC", type="l") points (6, reg.summary$bic[6], col ="red",cex =2, pch =20) plot(regfit.full,scale="r2") plot(regfit.full,scale ="adjr2") plot(regfit.full,scale ="Cp") plot(regfit.full,scale ="bic") coef(regfit.full,6) ``` # 6.5.2 Forward and Backward Stepwise Selection ```{r} regfit.fwd=regsubsets(Salary~.,data=Hitters,nvmax=19, method="forward") summary(regfit.fwd) regfit.bwd=regsubsets(Salary~.,data=Hitters,nvmax=19, method="backward") summary(regfit.bwd ) coef(regfit.full,7) coef(regfit.fwd,7) coef(regfit.bwd,7) ``` # 6.5.3 Choosing Among Models Using the Validation Set Approach and Cross-Validation ```{r} set.seed(1) train=sample (c(TRUE,FALSE), nrow(Hitters),rep=TRUE) test =(!train) regfit.best=regsubsets(Salary~.,data=Hitters[train,], nvmax=19) test.mat=model.matrix(Salary~.,data=Hitters[test,]) val.errors=rep(NA,19) for(i in 1:19){ coefi=coef(regfit.best,id=i) pred=test.mat [,names(coefi)]%*% coefi val.errors [i]= mean(( Hitters$Salary[test]-pred)^2) } val.errors which.min(val.errors) coef(regfit.best,10) predict.regsubsets =function (object,newdata ,id,...){ form=as.formula(object$call [[2]]) mat=model.matrix(form,newdata) coefi =coef(object,id=id) xvars =names (coefi) mat[,xvars]%*% coefi } regfit.best=regsubsets(Salary~.,data=Hitters,nvmax=19) coef(regfit.best,10) k=10 set.seed(1) folds=sample(1:k,nrow(Hitters),replace =TRUE) cv.errors=matrix(NA,k,19,dimnames=list(NULL,paste(1:19))) for(j in 1:k){ best.fit=regsubsets(Salary~.,data=Hitters[folds!=j,], nvmax=19) for(i in 1:19) { pred=predict (best.fit ,Hitters [folds ==j,], id=i) cv.errors [j,i]=mean( (Hitters$Salary[folds ==j]-pred)^2) } } mean.cv.errors=apply(cv.errors,2,mean) mean.cv.errors par(mfrow=c(1,1)) plot(mean.cv.errors,type="b") reg.best=regsubsets(Salary~.,data=Hitters,nvmax =19) coef(reg.best,11) ``` # 6.6 Lab 2: Ridge Regression and the Lasso ```{r} x=model.matrix(Salary~.,Hitters)[,-1] y=Hitters$Salary ``` ## 6.6.1 Ridge Regression ```{r} library(glmnet) grid=10^seq(10,-2,length=100) ridge.mod=glmnet(x,y,alpha=0,lambda=grid) dim(coef(ridge.mod)) ridge.mod$lambda[50] coef(ridge.mod)[,50] sqrt(sum(coef(ridge.mod)[ -1 ,50]^2)) ridge.mod$lambda[60] sqrt(sum(coef(ridge.mod)[-1 ,60]^2)) predict(ridge.mod,s=50,type="coefficients")[1:20,] set.seed(1) train=sample(1:nrow(x),nrow(x)/2) test=(-train) y.test=y[test] ridge.mod=glmnet(x[train,],y[train],alpha=0,lambda=grid, thresh=1e-12) ridge.pred=predict(ridge.mod,s=4,newx=x[test,]) mean((ridge.pred -y.test)^2) mean((mean(y[train]) -y.test)^2) ridge.pred=predict(ridge.mod,s=1e10,newx=x[test,]) mean((ridge.pred-y.test)^2) ridge.pred=predict(ridge.mod,s=0,newx=x[test,],exact=T,x=x[train,],y=y[train]) mean((ridge.pred -y.test)^2) lm(y~x, subset=train) predict(ridge.mod,s=0,exact=T,type="coefficients",x=x[train,],y=y[train]) [1:20,] set.seed(1) cv.out=cv.glmnet(x[train,],y[train],alpha=0) plot(cv.out) bestlam=cv.out$lambda.min bestlam ridge.pred=predict(ridge.mod,s=bestlam,newx=x[test,]) mean((ridge.pred -y.test)^2) out=glmnet(x,y,alpha=0) predict(out,type="coefficients",s=bestlam)[1:20,] ``` ## 6.6.2 The Lasso ```{r} lasso.mod=glmnet(x[train,],y[train],alpha=1,lambda=grid) plot(lasso.mod) set.seed(1) cv.out=cv.glmnet(x[train,],y[train],alpha=1) plot(cv.out) bestlam=cv.out$lambda.min lasso.pred=predict(lasso.mod,s=bestlam,newx=x[test,]) mean((lasso.pred -y.test)^2) out=glmnet(x,y,alpha=1,lambda=grid) lasso.coef=predict(out,type="coefficients",s=bestlam)[1:20,] lasso.coef lasso.coef[lasso.coef!=0] ``` # 6.7 Lab 3: PCR and PLS Regression ## 6.7.1 Principal Components Regression ```{r} library(pls) set.seed(2) pcr.fit=pcr(Salary~., data=Hitters,scale=TRUE, validation="CV") summary(pcr.fit) validationplot(pcr.fit,val.type="MSEP") set.seed(1) pcr.fit=pcr(Salary~., data=Hitters,subset=train,scale=TRUE, validation="CV") validationplot(pcr.fit,val.type="MSEP") pcr.pred=predict(pcr.fit,x[test,],ncomp=7) mean((pcr.pred -y.test)^2) pcr.fit=pcr(y~x,scale=TRUE,ncomp=7) summary(pcr.fit ) ``` # 6.7.2 Partial Least Squares ```{r} set.seed(1) pls.fit=plsr(Salary~.,data=Hitters,subset=train,scale=TRUE, validation ="CV") summary(pls.fit) validationplot(pls.fit,val.type="MSEP") pls.pred=predict(pls.fit ,x[test ,], ncomp =2) mean((pls.pred -y.test)^2) pls.fit=plsr(Salary~., data=Hitters,scale=TRUE,ncomp=2) summary (pls.fit) ```

以上就是本期的全部内容了

往期回顾:

番外回顾:



【本文地址】


今日新闻


推荐新闻


CopyRight 2018-2019 办公设备维修网 版权所有 豫ICP备15022753号-3